Advanced Lane Finding Project

The goals / steps of this project are the following:

  • Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.
  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.
  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.

First, I'll compute the camera calibration using chessboard images

In [1]:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib qt
%matplotlib inline
import pickle
from scipy import signal
from collections import deque
from moviepy.editor import VideoFileClip
from IPython.display import HTML
from scipy import signal
In [2]:
def draw_corners(x, y): 

    objp = np.zeros((x*y,3), np.float32)
    objp[:,:2] = np.mgrid[0:x,0:y].T.reshape(-1,2)
    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    for fname in glob.glob("camera_cal/calibration*.jpg"):
        img = cv2.imread(fname)
        grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(grayscaled, (x, y), None)
        if ret is True:
            imgpoints.append(corners)
            objpoints.append(objp)
            cv2.drawChessboardCorners(img, (x, y), corners, ret)
            f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,4))
            ax1.imshow(cv2.cvtColor(mpimg.imread(fname), cv2.COLOR_BGR2RGB))
            ax1.set_title('Original image', fontsize=18)
            ax2.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            ax2.set_title('Image with corners', fontsize=18)
    
    return (objpoints, imgpoints)
In [3]:
(object_pts, img_points) = draw_corners(9,6)
In [6]:
for fname in glob.glob("camera_cal/calibration*.jpg"):
    undistort(fname, object_pts, img_points)
In [7]:
for fname in glob.glob("test_images/test*.jpg"):
    undistort(fname, object_pts, img_points)
In [5]:
def undistort(img, object_pts, img_points, should_display=True):
    if isinstance(img, str):
        img = cv2.imread(img)
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera( \
        object_pts, img_points, (img.shape[1], img.shape[0]),None,None \
    )
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    
    if should_display is True:
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
        ax1.imshow(img) 
        ax1.set_title('Original image', fontsize=30)
        ax2.imshow(dst)
        ax2.set_title('Undistored image', fontsize=30)
    return dst

def apply_birds_eye(img, should_display=True):
    img_shape = (img.shape[1], img.shape[0])
    
    src = np.float32(SRC_PTS)
    dst = np.float32(DST_PTS)
    
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(img, M, img_shape)
    
    if should_display is True:
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
        f.tight_layout()
        ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        ax1.set_title('Undistorted Image', fontsize=20)
        ax2.imshow(cv2.cvtColor(warped, cv2.COLOR_BGR2RGB))
        ax2.set_title('Warped Image', fontsize=20)
        plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
    return warped, M, Minv

def apply_binary_thresholds(img, thresholds={  \
      's': {'min': 180, 'max': 255}, \
      'l': {'min': 255, 'max': 255},   \
      'b': {'min': 155, 'max': 200}  \
    } , should_display=True): 
    
    S = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:,:,2]  
    L = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)[:,:,0]
    B = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:,:,2] 

    s_bin = np.zeros_like(S)
    s_bin[(S >= thresholds['s']['min']) & (S <= thresholds['s']['max'])] = 1
    b_bin = np.zeros_like(B)
    b_bin[(B >= thresholds['b']['min']) & (B <= thresholds['b']['max'])] = 1
    l_bin = np.zeros_like(L)
    l_bin[(L >= thresholds['l']['min']) & (L <= thresholds['l']['max'])] = 1
    
    full_bin = np.zeros_like(s_bin)
    full_bin[(l_bin == 1) | (b_bin == 1) | (s_bin == 1)] = 1

    if should_display is True:
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
        f.tight_layout()
        ax1.set_title('original image', fontsize=16)
        ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype('uint8'))
        ax2.set_title('all thresholds', fontsize=16)
        ax2.imshow(full_bin, cmap='gray')
        
    return full_bin
In [8]:
SRC_PTS = [[490, 482],[810, 482],[1250, 720], [40, 720]]
DST_PTS = [[0, 0], [1280, 0],[1250, 720], [40, 720]]

orig = cv2.imread("test_images/test4.jpg")
undistorted = undistort(orig, object_pts, img_points, should_display=False)
warped, M, Minv = apply_birds_eye(undistorted, should_display=True)
In [9]:
def apply_binary_thresholds(img, thresholds={  \
      's': {'min': 180, 'max': 255}, \
      'l': {'min': 255, 'max': 255},   \
      'b': {'min': 155, 'max': 200}  \
    } , should_display=True): 
    
    S = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:,:,2]  
    L = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)[:,:,0]
    B = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)[:,:,2] 

    s_bin = np.zeros_like(S)
    s_bin[(S >= thresholds['s']['min']) & (S <= thresholds['s']['max'])] = 1
    b_bin = np.zeros_like(B)
    b_bin[(B >= thresholds['b']['min']) & (B <= thresholds['b']['max'])] = 1
    l_bin = np.zeros_like(L)
    l_bin[(L >= thresholds['l']['min']) & (L <= thresholds['l']['max'])] = 1
    
    full_bin = np.zeros_like(s_bin)
    full_bin[(l_bin == 1) | (b_bin == 1) | (s_bin == 1)] = 1

    if should_display is True:
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
        f.tight_layout()
        ax1.set_title('original image', fontsize=16)
        ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype('uint8'))
        ax2.set_title('all thresholds', fontsize=16)
        ax2.imshow(full_bin, cmap='gray')
        
    return full_bin
In [10]:
b_eye_bin = apply_binary_thresholds(warped)
In [11]:
def polynomial_lines(y, ms):
    return ms[0]*y**2 + ms[1]*y + ms[2]

def draw_polynomial(image, fn, ms, steps):
    pps = image.shape[0] // steps

    for i in range(steps):
        l = i * pps
        r = l + pps

        lp = (int(fn(l, ms)), l)
        rp = (int(fn(r, ms)), r)

        if i % 2 == 1:
            image = cv2.line(image, rp, lp, [255, 0, 0], 10)

    return image

def fit_polynomial(x, y):
    res = np.polyfit(x, y, 2)
    fx = res[0]*x**2 + res[1]*x + res[2]
    return fx, res

def multi_flatten(lx, ly, rx, ry):
    return ( \
        np.array([x \
             for i in lx \
             for x in i]), \
        np.array([x \
             for i in ly \
             for x in i]), \
        np.array([x \
             for i in rx \
             for x in i]), \
        np.array([x \
             for i in ry \
             for x in i]), \
    )

def get_pxs(img, xm, ym, size):
    window = img[ \
        int( \
            ym - size \
        ):int( \
            ym + size \
        ), int( \
            xm - size \
        ):int( \
            xm + size \
        ) \
    ]

    x, y = (window.T == 1).nonzero()
    return x + xm - size, y + ym - size

def histo(img, offset=50, steps=6,
                     g_rad=200, kernal_size=51,
                     x_offset=50):
    
    height = img.shape[0]
    o_height = height - offset
    width = img.shape[1]
    half = img.shape[1] // 2
    pps = o_height / steps
    
    lx_arr = []
    ly_arr = []
    rx_arr = []
    ry_arr = []

    for step in range(steps):
        lxm = []
        rxm = []
        ym = []
        ly = height - (step * pps) + offset
        ry = ly - pps + offset
        print("ly : ", ly)
        print("ry : ", ry)

        hist = np.sum( \
            img[ \
                int( \
                     ry \
                ):int( \
                    ly \
                ), int(x_offset):int(width - x_offset) \
            ], axis=0 \
        )

        smoothened = signal.medfilt(hist, kernal_size)

        lt = np.array(signal.find_peaks_cwt(smoothened[:half], np.arange(1, 10)))
        rt = np.array(signal.find_peaks_cwt(smoothened[half:], np.arange(1, 10)))
        
        print("lt", lt)
        print("rt", rt)

        if len(lt) > 0:
            lxm.append(max(lt))

        if len(rt) > 0:
            rxm.append(max(rt) + half)

        if len(lt) > 0 or len(rt) > 0:
            ym.append((ly + ry) // 2)
            
        for lx_centre, centre_y in zip(lxm, ym):
            print("lx_centre : ", lx_centre)
            print("centre_y : ", centre_y)
            print("lxm : ", lxm)
            print("ym : ", ym)
            left_x_additional, left_y_additional = get_pxs(img, lx_centre,
                                                                       centre_y, g_rad // 2)

            lx_arr.append(left_x_additional)
            ly_arr.append(left_y_additional)
    
        for rx_centre, centre_y in zip(rxm, ym):
            right_x_additional, right_y_additional = get_pxs(img, rx_centre,
                                                                         centre_y, g_rad // 2)
            
            rx_arr.append(right_x_additional)
            ry_arr.append(right_y_additional)

    if len(rx_arr) == 0 or len(lx_arr) == 0:
        x_offset = 0

        lx_arr = []
        ly_arr = []
        rx_arr = []
        ry_arr = []

        for step in range(steps):
            lxm = []
            rxm = []
            ym = []

            ly = height - (step * pps) + offset
            ry = ly - pps + offset

            hist = np.sum(
                img[
                    int(ry):int(ly),
                    int(x_offset):int(width - x_offset)
                ], axis=0)

            smoothened = signal.medfilt(hist, kernal_size)
            lt = np.array(signal.find_peaks_cwt(smoothened[:half], np.arange(1, 10)))
            rt = np.array(signal.find_peaks_cwt(smoothened[half:], np.arange(1, 10)))
            if len(lt) > 0:
                lxm.append(max(lt))

            if len(rt) > 0:
                rxm.append(max(rt) + half)

            if len(lt) > 0 or len(rt) > 0:
                ym.append((ly + ry) // 2)

            for lx_centre, centre_y in zip(lxm, ym):
                lx_additional, ly_additional = get_pxs(img, lx_centre,
                                                                           centre_y, g_rad // 2)
                print('\n left_x_additional : \n ')
                print(left_x_additional)
                print('\n left_y_additional : \n')
                print(left_y_additional)
                lx_arr.append(lx_additional)
                ly_arr.append(ly_additional)
                print('\n ly_arr : \n ')
                print(ly_arr)
                print(' \n lx_arr : \n')
                print(lx_arr)

            for rx_centre, centre_y in zip(rxm, ym):

                right_x_additional, right_y_additional = get_pxs(img, rx_centre,
                                                                             centre_y, g_rad // 2)
                print('right_x_additional : ',right_x_additional)
                print('right_y_additional : ', right_y_additional)
                rx_arr.append(right_x_additional)
                ry_arr.append(right_y_additional)
                print('ry_arr : ',ry_arr)
                print('rx_arr : ',rx_arr)
                
    plt.plot(hist)
    return multi_flatten(lx_arr, ly_arr, rx_arr, ry_arr)
In [12]:
pps = (orig.shape[0] - 50) / 6
g_rad = 300
kernal_size = 51
x_offset = 40
slate = np.zeros((720, 1280))
c_slate = cv2.cvtColor(slate.astype(np.uint8), cv2.COLOR_GRAY2RGB)
shape = orig.shape
half = orig.shape[1] // 2


lx, ly, rx, ry = histo(b_eye_bin, x_offset=x_offset)
                      
print(lx, ly, rx, ry)
ym_per_pix = 30./720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meteres per pixel in x dimension
lf, lcs = fit_polynomial(ly, lx)
rf, rcs = fit_polynomial(ry, rx)

plt.plot(lf, ly, color='red', linewidth=3)
plt.plot(rf, ry, color='red', linewidth=3)

#polyfit_left = draw_polynomial(slate, lane_poly, lcs, 30)
b_eye_poly = draw_polynomial(draw_polynomial(slate, polynomial_lines, lcs, 30), polynomial_lines, rcs, 30)
cv2.imwrite('output_images/test1_birdseye_poly.jpg',b_eye_poly)
plt.imshow(b_eye_poly, cmap="gray")
ly :  770.0
ry :  708.3333333333334
lt []
rt []
ly :  658.3333333333334
ry :  596.6666666666667
lt [217 241]
rt [488 506]
lx_centre :  241
centre_y :  627.0
lxm :  [241]
ym :  [627.0]
ly :  546.6666666666666
ry :  484.99999999999994
lt [215 246]
rt []
lx_centre :  246
centre_y :  515.0
lxm :  [246]
ym :  [515.0]
ly :  435.0
ry :  373.3333333333333
lt [230 231 261]
rt []
lx_centre :  261
centre_y :  404.0
lxm :  [261]
ym :  [404.0]
ly :  323.3333333333333
ry :  261.66666666666663
lt [234 265]
rt [423 442]
lx_centre :  265
centre_y :  292.0
lxm :  [265]
ym :  [292.0]
ly :  211.66666666666663
ry :  149.99999999999994
lt [233 267]
rt []
lx_centre :  267
centre_y :  180.0
lxm :  [267]
ym :  [180.0]
[148 149 149 ..., 312 312 313] [ 692.  692.  693. ...,  199.  200.  188.] [1163 1164 1164 ..., 1140 1140 1140] [ 625.  624.  625. ...,  380.  381.  384.]
Out[12]:
<matplotlib.image.AxesImage at 0xcc23e80>
In [13]:
def paint_over_lane_lines(canvas, lcs, rcs):
    for i in range(0, 720):
        canvas[i][int(polynomial_lines(i, lcs)):int(polynomial_lines(i, rcs))] = 1
    return canvas

painted_b_eye = c_slate
painted_b_eye[b_eye_poly > 1] = [0,0,255]
painted_area = paint_over_lane_lines(slate, lcs, rcs)
painted_b_eye[painted_area == 1] = [0,255,0]
cv2.imwrite('output_images/test1_birdseye_paintedover.jpg',painted_b_eye)
plt.imshow(painted_b_eye)
Out[13]:
<matplotlib.image.AxesImage at 0x143d4240>
In [14]:
def annotate(img, curvature, pos, curve_min):
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(img, 'curvature radius = %d(m)' % (curvature / 128 * 3.7), (50, 50), font, 1, (255, 255, 255), 2)
    cv2.putText(img, '%.2fm %s of center' % (np.abs(pos / 12800 * 3.7), "left"), (50, 100), font, 1,
                (255, 255, 255), 2)
    cv2.putText(img, 'curvature minimum radius = %d(m)' % (curve_min / 128 * 3.7), (50, 150), font, 1, (255, 255, 255), 2)

def pos_cen(y, left_poly, right_poly):
    return (1.5 * polynomial_lines(y, left_poly)
              - polynomial_lines(y, right_poly)) / 2

# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension

# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ly*ym_per_pix, lx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ry*ym_per_pix, rx*xm_per_pix, 2)
# Calculate the new radii of curvature
#left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
#right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])

left_curverad = ((1 + (2*left_fit_cr[0]*np.max(ly) + left_fit_cr[1])**2)**1.5) \
                                 /np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*np.max(ly) + right_fit_cr[1])**2)**1.5) \
                                    /np.absolute(2*right_fit_cr[0])

# Now our radius of curvature is in meters
print(' left curve radius  ',left_curverad, 'm', ' ,right curve radius ', right_curverad, 'm')
 left curve radius   2382.930689 m  ,right curve radius  1860.51522847 m
In [17]:
def annotate(img, curvature, pos, curve_min):
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(img, 'curvature radius = %d(m)' % (curvature ), (50, 50), font, 1, (255, 255, 255), 2)
    cv2.putText(img, '%.2fm %s of center' % (np.abs(pos), "left"), (50, 100), font, 1,
                (255, 255, 255), 2)
    cv2.putText(img, 'curvature minimum radius = %d(m)' % (curve_min ), (50, 150), font, 1, (255, 255, 255), 2)

def pos_cen(y, left_poly, right_poly):
    return (1.5 * polynomial_lines(y, left_poly)
              - polynomial_lines(y, right_poly)) / 2

#lc_radius = np.absolute(((1 + (2 * lcs[0] * 500 + lcs[1])**2) ** 1.5) \
#                /(2 * lcs[0]))
#rc_radius = np.absolute(((1 + (2 * rcs[0] * 500 + rcs[1]) ** 2) ** 1.5) \
#                 /(2 * rcs[0]))

ll_img = cv2.add( \
    cv2.warpPerspective( \
        painted_b_eye, Minv, (shape[1], shape[0]), flags=cv2.INTER_LINEAR \
    ), undistorted \
) 
plt.imshow(ll_img)
annotate(ll_img, curvature=(left_curverad + right_curverad) / 2, 
                     pos=pos_cen(719, lcs, rcs), 
                     curve_min=min(left_curverad, right_curverad))
plt.imshow(ll_img)
Out[17]:
<matplotlib.image.AxesImage at 0x9f94e10>
In [18]:
#reference https://github.com/JustinHeaton/Advanced-Lane-Finding/blob/master/Lane_Finding.ipynb
class Line:
    def __init__(self):
        self.found_in_prev_frame = False
        self.MAX_LENGTH = 10
        self.prev_x = None
        self.prec_y = None
        self.x_intercept = deque(maxlen=self.MAX_LENGTH)
        self.high = deque(maxlen=self.MAX_LENGTH)
        self.lastx_intercept = None
        self.last_high = None
        self.curve_radius = None
        self.c0 = deque(maxlen=self.MAX_LENGTH)
        self.c1 = deque(maxlen=self.MAX_LENGTH)
        self.c2 = deque(maxlen=self.MAX_LENGTH)
        self.polyx = None
        self.pts = []
        self.num_frames = 0
        self.pixel_search_window = 25
        
    def directed_search(self, x_pts, y_pts):
        DECREMENT_BY = 90
        HALF = 640
        height = 720
        xs = []
        ys = []
        if self.found_in_prev_frame == True: 
            B = HALF - 10
            while B >= 0:
                
                y = np.mean([height,B])
                x = ( \
                    (np.mean(self.c0))*y**2  \
                    + (np.mean(self.c1))*y \
                    + (np.mean(self.c2)) \
                )
                
                found = np.where( \
                    self.found_x_in_window(x, x_pts) \
                    & \
                    self.found_y_in_window(y_pts, B, height) \
                )
                
                x_window, y_window = x_pts[found], y_pts[found]
                
                if np.sum(x_window) != 0:
                    np.append(xs, x_window)
                    np.append(ys, y_window)
                height -= DECREMENT_BY
                B -= DECREMENT_BY
                
        if np.sum(xs) == 0: 
            self.found_in_prev_frame = False 
        return xs, ys, self.found_in_prev_frame
    
    def blind_histogram_search(self, x_pts, y_pts, img):
        
        DECREMENT_BY = 90
        HALF = img.shape[1] // 2
        height = img.shape[0]
        xs = []
        ys = []
        
        if self.found_in_prev_frame == False: 
            B = HALF - 10
            while B >= 0:
                
                hist = np.sum(img[B:height,:], axis=0)
                
                if self == Right_Lane:
                    peak = np.argmax(hist[HALF:]) + HALF
                else:
                    peak = np.argmax(hist[:HALF])
                    
                found = np.where( \
                    self.found_x_in_window(peak, x_pts) \
                    & \
                    self.found_y_in_window(y_pts, B, height) \
                )
                
                x_window, y_window = x_pts[found], y_pts[found]
                
                if np.sum(x_window) != 0:
                    xs.extend(x_window)
                    ys.extend(y_window)
                height -= DECREMENT_BY
                B -= DECREMENT_BY
                
        if np.sum(xs) > 0:
            self.found_in_prev_frame = True
        else:
            ys = self.prev_y
            xs = self.prev_x
        return xs, ys, self.found_in_prev_frame
    
    def found_x_in_window(self, peak, pts):
        return ( \
            (pts < (peak + self.pixel_search_window)) \
            & \
            ((peak - self.pixel_search_window) < pts) \
        )
    
    def found_y_in_window(self, pts, B, height):
        return ( \
            (pts > B) & (pts < height) \
        )
    
    def curve_rad(self, xs, ys):
        fit = np.polyfit( \
            ys*30. / 720, \
            xs*3.7 / 700, 2 \
        )
        return ((1 + (2*fit[0]*np.max(ys) + fit[1])**2)**1.5) / np.absolute(2*fit[0])
    
    def sort(self, xs, ys):
        s = np.argsort(ys)
        return xs[s], ys[s]


def poly_intercepts(poly):
    return ( 
        poly[0]*720**2 + poly[1]*720 + poly[2], 
        poly[0]*0**2 + poly[1]*0 + poly[2] 
    )
    
def perspective_transform(src, dst, img, shape_tuple):
    M = cv2.getPerspectiveTransform(src, dst)
    warped = cv2.warpPerspective(img, M, shape_tuple)
    return M, warped

def to_float_arr(arg):
    return np.array(arg).astype(np.float32)

def lane_search(lane, xpts, ypts, img):
    if lane.found_in_prev_frame == True:
        dirx, diry, found_in_prev_frame = \
            lane.directed_search(xpts, ypts)
        
    if lane.found_in_prev_frame == False:
        dirx, diry, found_in_prev_frame = \
            lane.blind_histogram_search(xpts, ypts, img)
            
    return dirx, diry, found_in_prev_frame

def recalc_intercept(struct, val):
    struct.append(val)
    return np.mean(struct), struct

def pipeline(img):
    height = img.shape[0]
    width = img.shape[1]
    
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
        object_pts, img_points, (width, height), None, None
    )
    undistorted_img = cv2.undistort(img, mtx, dist, None, mtx)
    
    src = np.float32([[490, 482],[810, 482],
                      [1250, 720],[0, 720]])
    dst = np.float32([[0, 0], [1280, 0], 
                     [1250, 720],[40, 720]])
    
    M, warped = perspective_transform(src, dst, undistorted_img, (width, height))
    bin_thresholds_dict = {  
      'l': {'min': 215, 'max': 255},  
      'b': {'min': 145, 'max': 200} 
    } 
    
    B = cv2.cvtColor(warped, cv2.COLOR_RGB2Lab)[:,:,2]
    L = cv2.cvtColor(warped, cv2.COLOR_RGB2LUV)[:,:,0]  
    
    b_bin = np.zeros_like(B)
    b_bin[(B >= bin_thresholds_dict['b']['min']) & (B <= bin_thresholds_dict['b']['max'])] = 1
    
    l_bin = np.zeros_like(L)
    l_bin[(L >= bin_thresholds_dict['l']['min']) & (L <= bin_thresholds_dict['l']['max'])] = 1

    combined_binary = np.zeros_like(b_bin)
    combined_binary[(l_bin == 1) | (b_bin == 1)] = 1

    detected_x_pts, detected_y_pts = np.nonzero(np.transpose(combined_binary)) 
    
    leftx, lefty, Left_Lane.found_in_prev_frame = lane_search(
        Left_Lane, detected_x_pts, 
        detected_y_pts, combined_binary
    )
    
    rightx, righty, Right_Lane.found_in_prev_frame = lane_search(
        Right_Lane, detected_x_pts, 
        detected_y_pts, combined_binary
    )
    
    coordinates = [leftx, lefty, rightx, righty]
    [leftx, lefty, rightx, righty] = [
        to_float_arr(coordinates[i]) for i in range(len(coordinates))
    ]
            
    left_poly = np.polyfit(lefty, leftx, 2)
  
    (leftx_intercept, left_high) = poly_intercepts(left_poly)
    
    leftx_intercept, Left_Lane.x_intercept = recalc_intercept(
        Left_Lane.x_intercept, leftx_intercept
    )
    
    left_high, Left_Lane.high = recalc_intercept(
        Left_Lane.high, left_high
    )
    
    Left_Lane.lastx_intercept = leftx_intercept
    Left_Lane.last_high = left_high
    
    leftx = np.append(leftx, [leftx_intercept, left_high])
    lefty = np.append(lefty, [720, 0])
    
    leftx, lefty = Left_Lane.sort(leftx, lefty)
    
    Left_Lane.prev_x = leftx
    Left_Lane.prev_y = lefty
    
    
    left_poly = np.polyfit(lefty, leftx, 2)
    coupled_L = [
        (Left_Lane.c0, left_poly[0]),
        (Left_Lane.c1, left_poly[1]),
        (Left_Lane.c2, left_poly[2])
    ]
    for tup in coupled_L:
        tup[0].append(tup[1])
        
    left_poly = [np.mean(i[0]) for i in coupled_L]
    
    left_polyx = left_poly[0]*lefty**2 + left_poly[1]*lefty + left_poly[2]
    Left_Lane.polyx = left_polyx
    
    right_poly = np.polyfit(righty, rightx, 2)
    (rightx_intercept, right_high) = poly_intercepts(right_poly)

    rightx_intercept, Right_Lane.x_intercept = recalc_intercept(
        Right_Lane.x_intercept, rightx_intercept
    )
    
    right_high, Right_Lane.high = recalc_intercept(
        Right_Lane.high, right_high
    )
    
    Right_Lane.lastx_intercept = rightx_intercept
    Right_Lane.last_high = right_high
    
    rightx = np.append(rightx, [rightx_intercept, right_high])
    righty = np.append(righty, [720, 0])
    
    rightx, righty = Right_Lane.sort(rightx, righty)
    
    Right_Lane.prev_x = rightx
    Right_Lane.prev_y = righty
    
    right_poly = np.polyfit(righty, rightx, 2)
    coupled_R = [
        (Right_Lane.c0, right_poly[0]),
        (Right_Lane.c1, right_poly[1]),
        (Right_Lane.c2, right_poly[2])
    ]
    for tup in coupled_R:
        tup[0].append(tup[1])
        
    right_poly = [np.mean(i[0]) for i in coupled_R]
    
    right_polyx = right_poly[0]*righty**2 + right_poly[1]*righty + right_poly[2]
    Right_Lane.polyx = right_polyx
        
    left_curverad = Left_Lane.curve_rad(leftx, lefty)
    right_curverad = Right_Lane.curve_rad(rightx, righty)
        
    if Left_Lane.num_frames % 3 == 0:
        Left_Lane.curve_radius = left_curverad
        Right_Lane.curve_radius = right_curverad
        
    pos = (rightx_intercept + leftx_intercept) / 2
    distance = abs((640 - pos) * 3.7 / 700) 
                
    Minv = cv2.getPerspectiveTransform(dst, src)
    
    warp_zero = np.zeros_like(combined_binary).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
    
    pts_left = np.array([np.flipud(np.transpose(np.vstack([Left_Lane.polyx, Left_Lane.prev_y])))])
    pts_right = np.array([np.transpose(np.vstack([right_polyx, Right_Lane.prev_y]))])
    pts = np.hstack((pts_left, pts_right))
    cv2.polylines(color_warp, np.int_([pts]), isClosed=False, color=(0,0,255), thickness = 40)
    cv2.fillPoly(color_warp, np.int_(pts), (34,255,34))
    newwarp = cv2.warpPerspective(color_warp, Minv, (width, height))
    result = cv2.addWeighted(undistorted_img, 1, newwarp, 0.5, 0)
    
    
    pos_txt = ''   
    
    if pos > 640:
        pos_txt = 'left'
    else: 
        pos_txt = 'right'
        
    distance_txt = '{:.2f}m'.format(distance)
        
    cv2.putText(result, 'vehicle is ' + distance_txt + ' ' + pos_txt + ' of center'.format(distance), (100,80),
                 fontFace = 16, fontScale = 2, color=(255,255,255), thickness = 2)
        
    cv2.putText(result, 'curve radius  {}(m)'.format(int((Left_Lane.curve_radius+Right_Lane.curve_radius)/2)), (120,140),
             fontFace = 16, fontScale = 2, color=(255,255,255), thickness = 2)
    Left_Lane.num_frames += 1
    return result
In [19]:
Left_Lane = Line()
Right_Lane = Line()
output = 'project_output_colour.mp4'
clip1 = VideoFileClip("project_video.mp4")
ccc = clip1.fl_image(pipeline) 
ccc.write_videofile(output, audio=False)
HTML("""
<video width="960" height="540" controls>
  <source src="{0}">
</video>
""".format(output))
[MoviePy] >>>> Building video project_output_colour.mp4
[MoviePy] Writing video project_output_colour.mp4
100%|█████████████████████████████████████▉| 1260/1261 [16:54<00:00,  1.24it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: project_output_colour.mp4 

Out[19]:
In [ ]:
Left_Lane = Line()
Right_Lane = Line()
out = 'challenge_output_result.mp4'
clip1 = VideoFileClip("challenge_video.mp4")
cc = clip1.fl_image(pipeline) 
cc.write_videofile(out, audio=False)

HTML("""
<video width="640" height="360" controls>
  <source src="{0}">
</video>
""".format(out))

And so on and so forth...

In [ ]: